Domain allocations use the buddy allocator.
__machine_halt(NULL);
}
+void free_perdomain_pt(struct domain *d)
+{
+ free_xenheap_page((unsigned long)d->mm.perdomain_pt);
+}
+
void arch_do_createdomain(struct domain *d)
{
d->shared_info = (void *)alloc_xenheap_page();
**/
-static inline void free_shadow_page( struct mm_struct *m,
- struct pfn_info *pfn_info )
+static inline void free_shadow_page(struct mm_struct *m,
+ struct pfn_info *page)
{
- unsigned long flags;
- unsigned long type = pfn_info->u.inuse.type_info & PGT_type_mask;
+ unsigned long type = page->u.inuse.type_info & PGT_type_mask;
m->shadow_page_count--;
else if (type == PGT_l2_page_table)
perfc_decr(shadow_l2_pages);
else printk("Free shadow weird page type pfn=%08x type=%08x\n",
- frame_table-pfn_info, pfn_info->u.inuse.type_info);
+ frame_table-page, page->u.inuse.type_info);
- pfn_info->u.inuse.type_info = 0;
-
- spin_lock_irqsave(&free_list_lock, flags);
- list_add(&pfn_info->list, &free_list);
- free_pfns++;
- spin_unlock_irqrestore(&free_list_lock, flags);
+ free_domheap_page(page);
}
static void __free_shadow_table( struct mm_struct *m )
static inline struct pfn_info *alloc_shadow_page(struct mm_struct *m)
{
m->shadow_page_count++;
- return alloc_domain_page(NULL);
+ return alloc_domheap_page();
}
void unshadow_table( unsigned long gpfn, unsigned int type )
static inline int is_free_domid(domid_t dom)
{
- struct domain *d;
+ struct domain *d;
- if (dom >= DOMID_SELF) return 0;
- d = find_domain_by_id(dom);
- if (d == NULL) {
- return 1;
- } else {
- put_domain(d);
+ if ( dom >= DOMID_SELF )
return 0;
- }
+
+ if ( (d = find_domain_by_id(dom)) == NULL )
+ return 1;
+
+ put_domain(d);
+ return 0;
}
/** Allocate a free domain id. We try to reuse domain ids in a fairly low range,
static domid_t curdom = 0;
static domid_t topdom = 101;
int err = 0;
- domid_t cur, dom, top;
+ domid_t dom;
- /* Try to use a domain id in the range 0..topdom, starting at curdom. */
spin_lock(&domid_lock);
- cur = curdom;
- dom = curdom;
- top = topdom;
- spin_unlock(&domid_lock);
- do {
- ++dom;
- if (dom == top) {
+
+ /* Try to use a domain id in the range 0..topdom, starting at curdom. */
+ for ( dom = curdom + 1; dom != curdom; dom++ )
+ {
+ if ( dom == topdom )
dom = 1;
- }
- if (is_free_domid(dom)) goto exit;
- } while (dom != cur);
+ if ( is_free_domid(dom) )
+ goto exit;
+ }
+
/* Couldn't find a free domain id in 0..topdom, try higher. */
- for (dom = top; dom < DOMID_SELF; dom++) {
- if(is_free_domid(dom)) goto exit;
+ for ( dom = topdom; dom < DOMID_SELF; dom++ )
+ {
+ if ( is_free_domid(dom) )
+ {
+ topdom = dom + 1;
+ goto exit;
+ }
}
+
/* No free domain ids. */
err = -ENOMEM;
+
exit:
- if (err == 0) {
- spin_lock(&domid_lock);
+ if ( err == 0 )
+ {
curdom = dom;
- if (dom >= topdom) {
- topdom = dom + 1;
- }
- spin_unlock(&domid_lock);
*pdom = dom;
}
+
+ spin_unlock(&domid_lock);
return err;
}
-#if 0
- struct domain *d;
- static domid_t domnr = 0;
- static spinlock_t domnr_lock = SPIN_LOCK_UNLOCKED;
- unsigned int pro;
- domid_t dom;
-
- ret = -ENOMEM;
-
- if(op->u.createdomain.domain > 0){
- d = find_domain_by_id(dom);
- if(d){
- put_domain(d);
- ret = -EINVAL;
- break;
- }
- } else {
- /* Search for an unused domain identifier. */
- for ( ; ; )
- {
- spin_lock(&domnr_lock);
- /* Wrap the roving counter when we reach first special value. */
- if ( (dom = ++domnr) == DOMID_SELF )
- dom = domnr = 1;
- spin_unlock(&domnr_lock);
-
- if ( (d = find_domain_by_id(dom)) == NULL )
- break;
- put_domain(d);
- }
- }
-#endif
-
long do_dom0_op(dom0_op_t *u_dom0_op)
{
long ret = 0;
unsigned int pro;
domid_t dom;
- ret = -ENOMEM;
-
dom = op->u.createdomain.domain;
- if ( 0 < dom && dom < DOMID_SELF )
+ if ( (dom > 0) && (dom < DOMID_SELF) )
{
+ ret = -EINVAL;
if ( !is_free_domid(dom) )
- {
- ret = -EINVAL;
break;
- }
- }
- else
- {
- ret = allocate_domid(&dom);
- if ( ret ) break;
}
+ else if ( (ret = allocate_domid(&dom)) != 0 )
+ break;
if ( op->u.createdomain.cpu == -1 )
pro = (unsigned int)dom % smp_num_cpus;
else
pro = op->u.createdomain.cpu % smp_num_cpus;
- d = do_createdomain(dom, pro);
- if ( d == NULL )
+ ret = -ENOMEM;
+ if ( (d = do_createdomain(dom, pro)) == NULL )
break;
if ( op->u.createdomain.name[0] )
pi->ht_per_core = ht;
pi->cores = smp_num_cpus / pi->ht_per_core;
pi->total_pages = max_page;
- pi->free_pages = free_pfns;
+ pi->free_pages = avail_domheap_pages();
pi->cpu_khz = cpu_khz;
copy_to_user(u_dom0_op, op, sizeof(*op));
struct pfn_info *page;
unsigned long i;
- /* Leave some slack pages; e.g., for the network. */
- if ( unlikely(free_pfns < (nr_pages + (SLACK_DOMAIN_MEM_KILOBYTES >>
- (PAGE_SHIFT-10)))) )
- {
- DPRINTK("Not enough slack: %u %u\n",
- free_pfns,
- SLACK_DOMAIN_MEM_KILOBYTES >> (PAGE_SHIFT-10));
- return 0;
- }
-
for ( i = 0; i < nr_pages; i++ )
{
- /* NB. 'alloc_domain_page' does limit-checking on pages per domain. */
if ( unlikely((page = alloc_domain_page(d)) == NULL) )
{
DPRINTK("Could not allocate a frame\n");
struct pfn_info *alloc_domain_page(struct domain *d)
{
struct pfn_info *page = NULL;
- unsigned long flags, mask, pfn_stamp, cpu_stamp;
+ unsigned long mask, pfn_stamp, cpu_stamp;
int i;
ASSERT(!in_irq());
- spin_lock_irqsave(&free_list_lock, flags);
- if ( likely(!list_empty(&free_list)) )
- {
- page = list_entry(free_list.next, struct pfn_info, list);
- list_del(&page->list);
- free_pfns--;
- }
- spin_unlock_irqrestore(&free_list_lock, flags);
-
+ page = alloc_domheap_page();
if ( unlikely(page == NULL) )
return NULL;
DPRINTK("Over-allocation for domain %u: %u >= %u\n",
d->domain, d->tot_pages, d->max_pages);
spin_unlock(&d->page_alloc_lock);
+ page->u.inuse.domain = NULL;
goto free_and_exit;
}
list_add_tail(&page->list, &d->page_list);
return page;
free_and_exit:
- spin_lock_irqsave(&free_list_lock, flags);
- list_add(&page->list, &free_list);
- free_pfns++;
- spin_unlock_irqrestore(&free_list_lock, flags);
+ free_domheap_page(page);
return NULL;
}
void free_domain_page(struct pfn_info *page)
{
- unsigned long flags;
int drop_dom_ref;
struct domain *d = page->u.inuse.domain;
page->u.inuse.count_info = 0;
- spin_lock_irqsave(&free_list_lock, flags);
- list_add(&page->list, &free_list);
- free_pfns++;
- spin_unlock_irqrestore(&free_list_lock, flags);
+ free_domheap_page(page);
}
if ( drop_dom_ref )
/* Grow the allocation if necessary. */
for ( alloc_pfns = d->tot_pages; alloc_pfns < nr_pages; alloc_pfns++ )
{
- if ( unlikely((page=alloc_domain_page(d)) == NULL) ||
- unlikely(free_pfns < (SLACK_DOMAIN_MEM_KILOBYTES >>
- (PAGE_SHIFT-10))) )
+ if ( unlikely((page=alloc_domain_page(d)) == NULL) )
{
domain_relinquish_memory(d);
return -ENOMEM;
if ( rc != 0 )
{
d->pirq_to_evtchn[pirq] = 0;
- DPRINTK("Couldn't bind to PIRQ %d (error=%d)\n", pirq, rc);
goto out;
}
for ( ; ; ) ;
}
- ASSERT((sizeof(struct pfn_info) << 20) >
+ ASSERT((sizeof(struct pfn_info) << 20) <=
(FRAMETABLE_VIRT_END - FRAMETABLE_VIRT_START));
init_frametable((void *)FRAMETABLE_VIRT_START, max_page);
max_page >> (20-PAGE_SHIFT), max_page,
max_mem >> (20-PAGE_SHIFT));
- add_to_domain_alloc_list(dom0_memory_end, max_page << PAGE_SHIFT);
-
heap_start = memguard_init(&_end);
-
+ heap_start = __va(init_heap_allocator(__pa(heap_start), max_page));
+
+ init_xenheap_pages(__pa(heap_start), xenheap_phys_end);
printk("Xen heap size is %luKB\n",
(xenheap_phys_end-__pa(heap_start))/1024 );
- init_page_allocator(__pa(heap_start), xenheap_phys_end);
-
+ init_domheap_pages(dom0_memory_end, max_page << PAGE_SHIFT);
+
/* Initialise the slab allocator. */
xmem_cache_init();
xmem_cache_sizes_init(max_page);
panic("Could not set up DOM0 guest OS\n");
/* The stash space for the initial kernel image can now be freed up. */
- add_to_domain_alloc_list(__pa(frame_table) + frame_table_size,
- dom0_memory_start);
+ init_domheap_pages(__pa(frame_table) + frame_table_size,
+ dom0_memory_start);
init_trace_bufs();
unsigned long frame_table_size;
unsigned long max_page;
-struct list_head free_list;
-spinlock_t free_list_lock;
-unsigned int free_pfns;
-
extern void init_percpu_info(void);
void __init init_frametable(void *frametable_vstart, unsigned long nr_pages)
memset(frame_table, 0, frame_table_size);
- spin_lock_init(&free_list_lock);
- INIT_LIST_HEAD(&free_list);
- free_pfns = 0;
-
/* Initialise to a magic of 0x55555555 so easier to spot bugs later. */
memset(machine_to_phys_mapping, 0x55, 4<<20);
mfn++ )
{
frame_table[mfn].u.inuse.count_info = 1 | PGC_allocated;
- frame_table[mfn].u.inuse.type_info = 1 | PGT_gdt_page; /* non-RW type */
+ frame_table[mfn].u.inuse.type_info = 1 | PGT_gdt_page; /* non-RW */
frame_table[mfn].u.inuse.domain = &idle0_task;
}
}
-
-
-void add_to_domain_alloc_list(unsigned long ps, unsigned long pe)
-{
- unsigned long i;
- unsigned long flags;
-
- spin_lock_irqsave(&free_list_lock, flags);
- for ( i = ps >> PAGE_SHIFT; i < (pe >> PAGE_SHIFT); i++ )
- {
- list_add_tail(&frame_table[i].list, &free_list);
- free_pfns++;
- }
- spin_unlock_irqrestore(&free_list_lock, flags);
-}
#include <xen/spinlock.h>
#include <xen/slab.h>
-static spinlock_t alloc_lock = SPIN_LOCK_UNLOCKED;
-
/*********************
* ALLOCATION BITMAP
ASSERT(!allocated_in_map(first_page + i));
#endif
- memguard_unguard_range(phys_to_virt(first_page << PAGE_SHIFT),
- nr_pages << PAGE_SHIFT);
-
curr_idx = first_page / PAGES_PER_MAPWORD;
start_off = first_page & (PAGES_PER_MAPWORD-1);
end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
ASSERT(allocated_in_map(first_page + i));
#endif
- memguard_guard_range(phys_to_virt(first_page << PAGE_SHIFT),
- nr_pages << PAGE_SHIFT);
-
curr_idx = first_page / PAGES_PER_MAPWORD;
start_off = first_page & (PAGES_PER_MAPWORD-1);
end_idx = (first_page + nr_pages) / PAGES_PER_MAPWORD;
* BINARY BUDDY ALLOCATOR
*/
-/* Linked lists of free chunks of different powers-of-two in size. */
-#define NR_ORDERS 11 /* Up to 2^10 pages can be allocated at once. */
-static struct list_head free_head[NR_ORDERS];
+#define MEMZONE_XEN 0
+#define MEMZONE_DOM 1
+#define NR_ZONES 2
+
+/* Up to 2^10 pages can be allocated at once. */
+#define MIN_ORDER 0
+#define MAX_ORDER 10
+#define NR_ORDERS (MAX_ORDER - MIN_ORDER + 1)
+static struct list_head heap[NR_ZONES][NR_ORDERS];
+
+static unsigned long avail[NR_ZONES];
#define round_pgdown(_p) ((_p)&PAGE_MASK)
#define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
+static spinlock_t heap_lock = SPIN_LOCK_UNLOCKED;
-/*
- * Initialise allocator, placing addresses [@min,@max] in free pool.
- * @min and @max are PHYSICAL addresses.
- */
-void __init init_page_allocator(unsigned long min, unsigned long max)
+
+/* Initialise allocator to handle up to @max_pages. */
+unsigned long init_heap_allocator(
+ unsigned long bitmap_start, unsigned long max_pages)
{
- int i;
- unsigned long range, bitmap_size;
- struct pfn_info *pg;
+ int i, j;
+ unsigned long bitmap_size;
+
+ memset(avail, 0, sizeof(avail));
- for ( i = 0; i < NR_ORDERS; i++ )
- INIT_LIST_HEAD(&free_head[i]);
+ for ( i = 0; i < NR_ZONES; i++ )
+ for ( j = 0; j < NR_ORDERS; j++ )
+ INIT_LIST_HEAD(&heap[i][j]);
- min = round_pgup (min);
- max = round_pgdown(max);
+ bitmap_start = round_pgup(bitmap_start);
/* Allocate space for the allocation bitmap. */
- bitmap_size = (max+1) >> (PAGE_SHIFT+3);
+ bitmap_size = max_pages / 8;
bitmap_size = round_pgup(bitmap_size);
- alloc_bitmap = (unsigned long *)phys_to_virt(min);
- min += bitmap_size;
- range = max - min;
+ alloc_bitmap = (unsigned long *)phys_to_virt(bitmap_start);
/* All allocated by default. */
memset(alloc_bitmap, ~0, bitmap_size);
+
+ return bitmap_start + bitmap_size;
+}
+
+/* Hand the specified arbitrary page range to the specified heap zone. */
+void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&heap_lock, flags);
+
/* Free up the memory we've been given to play with. */
- map_free(min>>PAGE_SHIFT, range>>PAGE_SHIFT);
+ map_free(page_to_pfn(pg), nr_pages);
+ avail[zone] += nr_pages;
- pg = &frame_table[min >> PAGE_SHIFT];
- while ( range != 0 )
+ while ( nr_pages != 0 )
{
/*
* Next chunk is limited by alignment of pg, but also must not be
* bigger than remaining bytes.
*/
- for ( i = 0; i < NR_ORDERS; i++ )
+ for ( i = 0; i < MAX_ORDER; i++ )
if ( ((page_to_pfn(pg) & (1 << i)) != 0) ||
- ((1 << (i + PAGE_SHIFT + 1)) > range) )
+ ((1 << (i + 1)) > nr_pages) )
break;
PFN_ORDER(pg) = i;
- list_add_tail(&pg->list, &free_head[i]);
+ list_add_tail(&pg->list, &heap[zone][i]);
- pg += 1 << i;
- range -= 1 << (i + PAGE_SHIFT);
+ pg += 1 << i;
+ nr_pages -= 1 << i;
}
+
+ spin_unlock_irqrestore(&heap_lock, flags);
}
-/* Allocate 2^@order contiguous pages. Returns a VIRTUAL address. */
-unsigned long alloc_xenheap_pages(int order)
+/* Allocate 2^@order contiguous pages. */
+struct pfn_info *alloc_heap_pages(int zone, int order)
{
- int i, attempts = 0;
+ int i;
struct pfn_info *pg;
unsigned long flags;
-retry:
- spin_lock_irqsave(&alloc_lock, flags);
+ spin_lock_irqsave(&heap_lock, flags);
/* Find smallest order which can satisfy the request. */
for ( i = order; i < NR_ORDERS; i++ )
- if ( !list_empty(&free_head[i]) )
+ if ( !list_empty(&heap[zone][i]) )
break;
if ( i == NR_ORDERS )
goto no_memory;
- pg = list_entry(free_head[i].next, struct pfn_info, list);
+ pg = list_entry(heap[zone][i].next, struct pfn_info, list);
list_del(&pg->list);
/* We may have to halve the chunk a number of times. */
while ( i != order )
{
PFN_ORDER(pg) = --i;
- list_add_tail(&pg->list, &free_head[i]);
+ list_add_tail(&pg->list, &heap[zone][i]);
pg += 1 << i;
}
- map_alloc(page_to_pfn(pg), 1<<order);
+ map_alloc(page_to_pfn(pg), 1 << order);
+ avail[zone] -= 1 << order;
- spin_unlock_irqrestore(&alloc_lock, flags);
+ spin_unlock_irqrestore(&heap_lock, flags);
- return (unsigned long)page_to_virt(pg);
+ return pg;
no_memory:
- spin_unlock_irqrestore(&alloc_lock, flags);
-
- if ( attempts++ < 8 )
- {
- xmem_cache_reap();
- goto retry;
- }
-
- printk("Cannot handle page request order %d!\n", order);
- dump_slabinfo();
-
- return 0;
+ spin_unlock_irqrestore(&heap_lock, flags);
+ return NULL;
}
-/* Free 2^@order pages at VIRTUAL address @p. */
-void free_xenheap_pages(unsigned long p, int order)
+/* Free 2^@order set of pages. */
+void free_heap_pages(int zone, struct pfn_info *pg, int order)
{
unsigned long mask;
- struct pfn_info *pg = virt_to_page(p);
unsigned long flags;
- spin_lock_irqsave(&alloc_lock, flags);
+ spin_lock_irqsave(&heap_lock, flags);
- map_free(page_to_pfn(pg), 1<<order);
+ map_free(page_to_pfn(pg), 1 << order);
+ avail[zone] += 1 << order;
/* Merge chunks as far as possible. */
- for ( ; ; )
+ while ( order < MAX_ORDER )
{
mask = 1 << order;
}
PFN_ORDER(pg) = order;
- list_add_tail(&pg->list, &free_head[order]);
+ list_add_tail(&pg->list, &heap[zone][order]);
+
+ spin_unlock_irqrestore(&heap_lock, flags);
+}
+
+
+
+/*************************
+ * XEN-HEAP SUB-ALLOCATOR
+ */
- spin_unlock_irqrestore(&alloc_lock, flags);
+void init_xenheap_pages(unsigned long ps, unsigned long pe)
+{
+ ps = round_pgup(ps);
+ pe = round_pgdown(pe);
+ memguard_guard_range(__va(ps), pe - ps);
+ init_heap_pages(MEMZONE_XEN, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT);
+}
+
+unsigned long alloc_xenheap_pages(int order)
+{
+ struct pfn_info *pg;
+ int attempts = 0;
+
+ retry:
+ if ( unlikely((pg = alloc_heap_pages(MEMZONE_XEN, order)) == NULL) )
+ goto no_memory;
+ memguard_unguard_range(page_to_virt(pg), 1 << (order + PAGE_SHIFT));
+ return (unsigned long)page_to_virt(pg);
+
+ no_memory:
+ if ( attempts++ < 8 )
+ {
+ xmem_cache_reap();
+ goto retry;
+ }
+
+ printk("Cannot handle page request order %d!\n", order);
+ dump_slabinfo();
+ return 0;
+}
+
+void free_xenheap_pages(unsigned long p, int order)
+{
+ memguard_guard_range((void *)p, 1 << (order + PAGE_SHIFT));
+ free_heap_pages(MEMZONE_XEN, virt_to_page(p), order);
+}
+
+
+
+/*************************
+ * DOMAIN-HEAP SUB-ALLOCATOR
+ */
+
+void init_domheap_pages(unsigned long ps, unsigned long pe)
+{
+ ps = round_pgup(ps);
+ pe = round_pgdown(pe);
+ init_heap_pages(MEMZONE_DOM, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT);
+}
+
+struct pfn_info *alloc_domheap_pages(int order)
+{
+ struct pfn_info *pg = alloc_heap_pages(MEMZONE_DOM, order);
+ return pg;
+}
+
+void free_domheap_pages(struct pfn_info *pg, int order)
+{
+ free_heap_pages(MEMZONE_DOM, pg, order);
+}
+
+unsigned long avail_domheap_pages(void)
+{
+ return avail[MEMZONE_DOM];
}
#define __cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#define ____cacheline_aligned __cacheline_aligned
-/*
- * Amount of slack domain memory to leave in system, in megabytes.
- * Prevents a hard out-of-memory crunch for things like network receive.
- */
-#define SLACK_DOMAIN_MEM_KILOBYTES 2048
-
/* Linkage for x86 */
#define asmlinkage __attribute__((regparm(0)))
#define __ALIGN .align 16,0x90
extern void arch_final_setup_guestos(
struct domain *d, full_execution_context_t *c);
-static inline void free_perdomain_pt(struct domain *d)
-{
- free_xenheap_page((unsigned long)d->mm.perdomain_pt);
-}
+extern void free_perdomain_pt(struct domain *d);
extern void domain_relinquish_memory(struct domain *d);
#define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) \
do { \
- (_pfn)->u.inuse.domain = (_dom); \
+ (_pfn)->u.inuse.domain = (_dom); \
/* The incremented type count is intended to pin to 'writeable'. */ \
- (_pfn)->u.inuse.type_info = PGT_writeable_page | PGT_validated | 1; \
+ (_pfn)->u.inuse.type_info = PGT_writeable_page | PGT_validated | 1;\
wmb(); /* install valid domain ptr before updating refcnt. */ \
spin_lock(&(_dom)->page_alloc_lock); \
/* _dom holds an allocation reference */ \
- (_pfn)->u.inuse.count_info = PGC_allocated | 1; \
+ (_pfn)->u.inuse.count_info = PGC_allocated | 1; \
if ( unlikely((_dom)->xenheap_pages++ == 0) ) \
get_domain(_dom); \
spin_unlock(&(_dom)->page_alloc_lock); \
extern struct pfn_info *frame_table;
extern unsigned long frame_table_size;
-extern struct list_head free_list;
-extern spinlock_t free_list_lock;
-extern unsigned int free_pfns;
extern unsigned long max_page;
void init_frametable(void *frametable_vstart, unsigned long nr_pages);
-void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
-struct pfn_info *alloc_domain_page(struct domain *p);
+struct pfn_info *alloc_domain_page(struct domain *d);
void free_domain_page(struct pfn_info *page);
int alloc_page_type(struct pfn_info *page, unsigned int type);
return rc;
}
-#define ASSERT_PAGE_IS_TYPE(_p, _t) \
- ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \
+#define ASSERT_PAGE_IS_TYPE(_p, _t) \
+ ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \
ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
-#define ASSERT_PAGE_IS_DOMAIN(_p, _d) \
+#define ASSERT_PAGE_IS_DOMAIN(_p, _d) \
ASSERT(((_p)->u.inuse.count_info & PGC_count_mask) != 0); \
ASSERT((_p)->u.inuse.domain == (_d))
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define page_address(_p) (__va(((_p) - frame_table) << PAGE_SHIFT))
+#define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT))
#define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - frame_table) < max_mapnr)
#ifndef __XEN_MM_H__
#define __XEN_MM_H__
-/* page_alloc.c */
-void init_page_allocator(unsigned long min, unsigned long max);
+#include <asm/mm.h>
+
+/* Generic allocator */
+unsigned long init_heap_allocator(
+ unsigned long bitmap_start, unsigned long max_pages);
+void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages);
+struct pfn_info *alloc_heap_pages(int zone, int order);
+void free_heap_pages(int zone, struct pfn_info *pg, int order);
+
+/* Xen suballocator */
+void init_xenheap_pages(unsigned long ps, unsigned long pe);
unsigned long alloc_xenheap_pages(int order);
void free_xenheap_pages(unsigned long p, int order);
#define alloc_xenheap_page() (alloc_xenheap_pages(0))
#define free_xenheap_page(_p) (free_xenheap_pages(_p,0))
-#include <asm/mm.h>
+/* Domain suballocator */
+void init_domheap_pages(unsigned long ps, unsigned long pe);
+struct pfn_info *alloc_domheap_pages(int order);
+void free_domheap_pages(struct pfn_info *pg, int order);
+unsigned long avail_domheap_pages(void);
+#define alloc_domheap_page() (alloc_domheap_pages(0))
+#define free_domheap_page(_p) (free_domheap_pages(_p,0))
#endif /* __XEN_MM_H__ */